In [1]:
#The following code generates the summary of the trading activities of the stock of TLS
In [2]:
import warnings
warnings.filterwarnings("ignore")
from IPython.display import HTML
HTML('''<script>
code_show=true; 
function code_toggle() {
 if (code_show){
 $('div.input').hide();
 } else {
 $('div.input').show();
 }
 code_show = !code_show
} 
$( document ).ready(code_toggle);
</script>
The raw code for this IPython notebook is by default hidden for easier reading.
To toggle on/off the raw code, click <a href="javascript:code_toggle()">here</a>.''')
Out[2]:
The raw code for this IPython notebook is by default hidden for easier reading. To toggle on/off the raw code, click here.
In [3]:
import os
import pandas as pd
import numpy as np
import datetime
import copy
#Reading the file
cdf = pd.read_excel("TLS  Course of Sales.xlsx", sheet_name=None)
#Concatenating all the sheets to a single sheet
out_df = pd.concat(cdf,ignore_index=True)
#Saving the resultant dataframe to a csv
out_df.to_csv("SingleSheetTLS.csv")
In [4]:
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
from pylab import *
import copy
import matplotlib.ticker as tick
import warnings
warnings.filterwarnings("ignore")
# creating a excel writer for saving the results
TLS_writer = pd.ExcelWriter('TLS_Summary_check.xlsx', engine='xlsxwriter')
In [5]:
#Reading the file and storing the sheets in a dictionary
xl_file = pd.ExcelFile("TLS  Course of Sales.xlsx")
dfs = {sheet_name: xl_file.parse(sheet_name) 
          for sheet_name in xl_file.sheet_names}
#Removing the empty sheets and their corresponding keys
tempdfs=copy.deepcopy(dfs)
for key in dfs.keys():
    if(len(dfs[key])==0):
        del tempdfs[key]
dfs=tempdfs

#Creating daily_dataframe to store the daily calculated measures
daily_dataframe={}
daily_dataframe['Date']=[]
daily_dataframe['AvgValue'] = []
daily_dataframe['AvgVolume'] = []
daily_dataframe['NumTrades'] = []

#Iterating over all the keys
for key in dfs.keys():
    tdf=dfs[key]
    nine_to_four_df=tdf
    nine_to_four_df=nine_to_four_df.sort_values(by="TradeTime")
    #Extracting the relevant information
    date=nine_to_four_df.iloc[0]["TradeDate"].date()
    avgvalue=nine_to_four_df['TradeValue'].sum()
    avgvolume=nine_to_four_df['TradeVolume'].sum()
    numtrades=len(nine_to_four_df)
    #Appending the information to their corresponding lists
    daily_dataframe['Date'].append(date)
    daily_dataframe['AvgValue'].append(avgvalue)
    daily_dataframe['AvgVolume'].append(avgvolume)
    daily_dataframe['NumTrades'].append(numtrades)

#Saving the output in a data-frame
TLS_daily_df=pd.DataFrame.from_dict(daily_dataframe)

Daily Trading Stats

In [6]:
#extracting the date from the dates for the labels of the x-axis in the below
def generate_date2(date):
    return int(str(date).split("-")[2])

TLS_daily_df=pd.DataFrame.from_dict(daily_dataframe)
#Generating the dates 
TLS_daily_df["Date2"]=TLS_daily_df["Date"].apply(generate_date2)
#Sort the dataframe by dates for plot
TLS_daily_df=TLS_daily_df.sort_values(by=['Date'])
#Writing the dataframe in the summary file
TLS_daily_df.to_excel(TLS_writer, sheet_name='Tradingpattern_daily_stats')
In [7]:
# Create matplotlib figure
fig = plt.figure() 
ax = fig.add_subplot(111) 
#Adding the bar plot for DollarValue
TLS_daily_df["AvgValue"].plot(kind='bar',color='y')
#Adding the line plot for number of trades
TLS_daily_df['NumTrades'].plot(kind='line', secondary_y=True)
#setting the both the axis labels 
ax.set_xticklabels(TLS_daily_df["Date2"])
ax.set_ylabel('DollarValue in millions')
ax.set_xlabel('Dates')

#creating function for formatting the y-axis labels in a human-readable format
def y_fmt(y, pos):
    decades = [1e9, 1e6, 1e3, 1e0]
    suffix  = ["G", "M", "k", ""]
    if y == 0:
        return str(0)
    for i, d in enumerate(decades):
        if np.abs(y) >=d:
            val = y/float(d)
            signf = len(str(val).split(".")[1])
            if signf == 0:
                return '{val:d} {suffix}'.format(val=int(val), suffix=suffix[i])
            else:
                if signf == 1:
                    #print (val, signf)
                    if str(val).split(".")[1] == "0":
                        return '{val:d} {suffix}'.format(val=int(round(val)), suffix=suffix[i]) 
                tx = "{"+"val:.{signf}f".format(signf = signf) +"} {suffix}"
                return tx.format(val=val, suffix=suffix[i])
    return y

#Calling the function to make the y-axis labels into human readable format
ax.yaxis.set_major_formatter(tick.FuncFormatter(y_fmt))
#Adding title and legends to the chart
fig.suptitle("Trading Stats of TLS Aug 2019", fontsize=20)
plt.legend(loc='upper right')
plt.style.use('seaborn')
plt.show()

Distribution of TradeCategory

In [8]:
import os
import pandas as pd
import numpy as np
import datetime
import copy
#Reading the file
cdf = pd.read_excel("TLS  Course of Sales.xlsx", sheet_name=None)
#Concatenating all the sheets to a single sheet
out_df = pd.concat(cdf,ignore_index=True)

#Extracting only the ChiX trades
ChiXdf=out_df.loc[out_df["DataSource"]=="CXA"]

#Grouping the trade condcodes into a tradetype and create a column for it
m = {"CHIXblock": ["SP","SX","SXDE"], "CHIXother": ["PC","PO","ET","MC","LT"], "CHIXNBBO": ["NX","NXXT"],"CHIXon-market hidden":["HL","BR"], "CHIXon-market lit": [np.nan,"XT","S","B"]}
m2 = {v: k for k,vv in m.items() for v in vv}
ChiXdf["Tradetype"]=ChiXdf.CondCodes.map(m2).astype("category", categories=set(m2.values()))

#Reading the file
cdf = pd.read_excel("TLS  Course of Sales.xlsx", sheet_name=None)
#Concatenating all the sheets to a single sheet
out_df = pd.concat(cdf,ignore_index=True)
#Extracting only the trades with reason=MATCH
matching=out_df.loc[out_df["Reason"]=="MATCH"]
#Extracting only the trades with reason!=MATCH
non_auction=out_df.loc[out_df["Reason"]!="MATCH"]

from datetime import time
A=time(hour =9, minute = 59, second = 45)
B=time(hour =10, minute = 10, second = 0)
#Extracting only the trades representing opening auction
openingauction=matching.loc[(matching['TradeTime']>=A)&(matching['TradeTime']<=B)]

from datetime import time
A=time(hour =16, minute = 0, second = 0)
B=time(hour =16, minute = 12, second = 0)
#Extracting only the trades representing closing auction
closingauction=matching.loc[(matching['TradeTime']>=A)&(matching['TradeTime']<=B)]

#Creating CondCodes for Opening and Closing Auction
openingauction["CondCodes"]="OpeningAuction"
closingauction["CondCodes"]="ClosingAuction"

#Concatenating opening and closing auctin dataframes to create total auction dataframe
auction=pd.concat([openingauction,closingauction])
#Concatenating auction and non auctin dataframes to create total dataframe
totaldf=pd.concat([auction,non_auction])
#Extracting only the ASX trades
asxdf=totaldf.loc[totaldf["DataSource"]=="TM"]

#Creating blocklist for tradetypes
temp=["SX","SP","SA","S0","S1","S2","S3","P1","P2","PT"]
trade_blocklist=["SX","SP","SA","S0","S1","S2","S3","P1","P2","PT"]
for val in temp:
    trade_blocklist.append(val+str("XT"))

#Creating otherlist for tradetypes
temp=["L","LT","L1","L2","L3","L4","L5"]
trade_otherlist=["L","LT","L1","L2","L3","L4","L5"]
for val in temp:
    trade_otherlist.append(val+str("XT"))

trade_optionslist=["EC","ECXT","EP","EPXT"]
trade_misc=["OSXT","ET","ETXT"]
trade_otherlist=trade_otherlist+trade_optionslist+trade_misc

#Grouping the trade condcodes into a tradetype and create a column for it
mapoftradetype = {"ASXOpenAuction": ["OpeningAuction"], "ASXCloseAuction": ["ClosingAuction"], "ASXNBBO": ["NX","NXXT"],"ASXCentrePt":["CX","CP","CXXT"],"ASXCentrept-Pref":["CE","CEXT"], "ASXOn-market lit": [np.nan,"XT","EQTM"],"ASXBlock":trade_blocklist,"ASXOthers":trade_otherlist}
mappedtradetype = {v: k for k,vv in mapoftradetype.items() for v in vv}
asxdf["Tradetype"]=asxdf.CondCodes.map(mappedtradetype).astype("category", categories=set(mappedtradetype.values()))
In [9]:
#Concatenating asx and chix dataframes to create total dataframe
tempdf=pd.concat([asxdf,ChiXdf])
In [10]:
#Calculating the sum of trades in each tradetype
#Grouping the dataframe based on the tradetype
Group_Trade_df=tempdf.groupby("Tradetype").sum()
#Resetting the index to get the labels
Group_Trade_df.reset_index(inplace=True)

from collections import Counter 
#Using Counter to count the occurence of each tradetype
Counter_trades = Counter(tempdf["Tradetype"])
#Summing up all the values
totaltrades=sum(list(Counter_trades.values()))
#Creating a list of the labels
labels=list(Counter_trades.keys())
sizes=list(Counter_trades.values())
#Calculating the %share of each tradetype
sizes=np.array(sizes)/totaltrades
listsizes=list(sizes)
#Saving into a dataframe
TLS_temp_df = pd.DataFrame(list(zip(labels,listsizes)))
#Writing the dataframe in the summary file
TLS_temp_df.to_excel(TLS_writer, sheet_name='Categories_numberoftrades')

TradeValue

In [22]:
#Calculating the %share of each tradetype
sizes=Group_Trade_df["TradeValue"]/sum(Group_Trade_df["TradeValue"])
labels=Group_Trade_df["Tradetype"]
listsizes=list(sizes)
#Saving into a dataframe
TLS_temp_df = pd.DataFrame(list(zip(labels,listsizes)))
#Writing the dataframe in the summary file
TLS_temp_df.to_excel(TLS_writer, sheet_name='Categories_tradevalues')
#Plot the data in the form of a pie chart
color_val={}
for i in range(len(labels)):
    color_val.update({labels[i]:sizes[i]})

color_dict=dict({"ASXBlock":"#F08080",
 "ASXCentrePt":"#E9967A",
 "ASXNBBO":"#B22222",
 "ASXCentrept-Pref":"#DB7090",
 "ASXCloseAuction":"#FA8072",
 "ASXOn-market lit":"#FF4500",
 "ASXOpenAuction":"#FDA97A",
 "ASXOthers":"#CD5C5c",
 "CHIXNBBO":"#87CEFA",
 "CHIXblock":"#000080",
 "CHIXon-market hidden":"#1E90FF",
 "CHIXon-market lit":"#00BFFF",
 "CHIXother":"#6A5ACD",
})

colors=[]
for i in range(len(labels)):
    colors.append(color_dict[labels[i]])

import plotly.graph_objects as go
import plotly.offline as pyo
pyo.offline.init_notebook_mode()
layout=dict(title="TradeValues",
autosize=False,
height = 1000,
width = 1000,
)
fig = go.Figure(data=[go.Pie(labels=labels, values=sizes,marker_colors=colors,sort=False)],layout=layout)
fig.update_traces(hole=.4,textfont_size=25,hoverinfo='label+percent')
fig.update_layout(
    title=go.layout.Title(
        font=dict(size=35, family='Courier', color='black'),
        text="TradeValues",
        xref='paper',
        x=0.5
    ),legend=dict(x=-.2, y=1.2)
    
)
fig.show()

TradeVolume

In [23]:
#Calculating the %share of each tradetype
sizes=Group_Trade_df["TradeVolume"]/sum(Group_Trade_df["TradeVolume"])
labels=Group_Trade_df["Tradetype"]
listsizes=list(sizes)
#Saving into a dataframe
TLS_temp_df = pd.DataFrame(list(zip(labels,listsizes)))
TLS_temp_df.to_excel(TLS_writer, sheet_name='Categories_tradeVolume')
#Plot the data in the form of a pie chart
color_val={}
for i in range(len(labels)):
    color_val.update({labels[i]:sizes[i]})

color_dict=dict({"ASXBlock":"#F08080",
 "ASXCentrePt":"#E9967A",
 "ASXNBBO":"#B22222",
 "ASXCentrept-Pref":"#DB7090",
 "ASXCloseAuction":"#FA8072",
 "ASXOn-market lit":"#FF4500",
 "ASXOpenAuction":"#FDA97A",
 "ASXOthers":"#CD5C5c",
 "CHIXNBBO":"#87CEFA",
 "CHIXblock":"#000080",
 "CHIXon-market hidden":"#1E90FF",
 "CHIXon-market lit":"#00BFFF",
 "CHIXother":"#6A5ACD",
})

colors=[]
for i in range(len(labels)):
    colors.append(color_dict[labels[i]])

import plotly.graph_objects as go
import plotly.offline as pyo
pyo.offline.init_notebook_mode()
layout=dict(title="TradeVolume",
autosize=False,
height = 1000,
width = 1000,
)
fig = go.Figure(data=[go.Pie(labels=labels, values=sizes,marker_colors=colors,sort=False)],layout=layout)
fig.update_traces(hole=.4,textfont_size=25,hoverinfo='label+percent')
fig.update_layout(
    title=go.layout.Title(
        font=dict(size=35, family='Courier', color='black'),
        text="TradeVolume",
        xref='paper',
        x=0.5
    )
    ,legend=dict(x=-.2, y=1.2)
    
)
fig.show()

Price Pressure

In [13]:
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
import copy
from datetime import time
#Reading the file and storing the sheets in a dictionary
xl_file = pd.ExcelFile("TLS  Course of Sales.xlsx")
dfs = {sheet_name: xl_file.parse(sheet_name) 
          for sheet_name in xl_file.sheet_names}
#Removing the empty sheets and their corresponding keys
tempdfs=copy.deepcopy(dfs)
for key in dfs.keys():
    if(len(dfs[key])==0):
        del tempdfs[key]

#Creating daily_dataframe to store the daily calculated measures
dfs=tempdfs
daily_dataframe={}
daily_dataframe['Date']= []
daily_dataframe['ask']= []
daily_dataframe['bid']= []
daily_dataframe['match']= []
daily_dataframe['nan']= []

#Iterating over all the keys
for key in dfs.keys():
    tdf=dfs[key]
    out_df=copy.deepcopy(tdf)
    tempdf=copy.deepcopy(out_df)
    #Grouping the trade-reason into a price_pressure and create a column for it
    price_pressure = {"1": ["ASK"], "2": ["BID"], "3": ["MATCH"],"4":[np.nan]}
    price_pressure_values = {v: k for k,vv in price_pressure.items() for v in vv}
    tempdf["Reason"]=tempdf.Reason.map(price_pressure_values).astype("category", categories=set(price_pressure_values.values()))
    #Extracting the relevant date
    date=tempdf.iloc[0]["TradeDate"].date()
    #Grouping based on Trade-Reason
    ppdf=tempdf.groupby("Reason").sum()
    #Resetting index to get labels
    ppdf.reset_index(inplace=True)
    #Calculating % share of each reason
    sizes=ppdf["TradeVolume"]/sum(ppdf["TradeVolume"])
    labels=ppdf["Reason"]
    numask=0
    numnan=0
    numbid=0
    nummatch=0
    #Remapping the labels to a human-readable format
    for i in range(len(labels)):
        if(labels[i]=="1"):
            numask=sizes[i]
        elif(labels[i]=="4"):
            numnan=sizes[i]
        elif(labels[i]=="3"):
            numbid=sizes[i]
        elif(labels[i]=="2"):
            nummatch=sizes[i]
    #Appending the information to their corresponding lists
    daily_dataframe['Date'].append(date)
    daily_dataframe['ask'].append(numask)
    daily_dataframe['bid'].append(numbid)
    daily_dataframe['match'].append(nummatch)
    daily_dataframe['nan'].append(numnan)

#Saving into a dataframe
TLS_daily_df=pd.DataFrame.from_dict(daily_dataframe)
TLS_daily_df=TLS_daily_df.sort_values(by=['Date'])
#Writing the dataframe in the summary file
TLS_daily_df.to_excel(TLS_writer, sheet_name='Daily_Price_pressure')
#Plot the data in the form of a line chart
#Labelling the axises of the chart
plt.style.use('seaborn')
plt.ylabel('% share in daily trading')
plt.xlabel('Dates')
plt.title("% Shares of Reasons of TLS Aug 2019")
#Plotting the daily ask %
plt.plot(TLS_daily_df["Date"], TLS_daily_df["ask"])
#Plotting the daily bid %
plt.plot(TLS_daily_df["Date"], TLS_daily_df["bid"])
#Plotting the daily match %
plt.plot(TLS_daily_df["Date"], TLS_daily_df["match"])
plt.legend(loc='upper right')
plt.show()

Daily Stats

In [14]:
import warnings
warnings.filterwarnings("ignore")
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
import copy
#Reading the file and storing the sheets in a dictionary
xl_file = pd.ExcelFile("TLS  Course of Sales.xlsx")
dfs = {sheet_name: xl_file.parse(sheet_name) 
          for sheet_name in xl_file.sheet_names}

#Removing the empty sheets and their corresponding keys
tempdfs=copy.deepcopy(dfs)
for key in dfs.keys():
    if(len(dfs[key])==0):
        del tempdfs[key]
dfs=tempdfs

#Creating daily_dataframe to store the daily calculated measures
daily_dataframe={}
daily_dataframe['Date'] = []
daily_dataframe['Open'] = []
daily_dataframe['Close'] = []
daily_dataframe['Low'] = []
daily_dataframe['High'] = []
daily_dataframe['Vwap'] = []
from datetime import time
#Creating time relevant to opening and closing auctions
A=time(hour =9, minute = 59, second = 45)
B=time(hour =10, minute = 10, second = 0)
A1=time(hour =16, minute = 0, second = 0)
B1=time(hour =16, minute = 12, second = 0)
#Iterating over all the keys
for key in dfs.keys():
    tdf=dfs[key]
    out_df=copy.deepcopy(tdf)
    #Extracting only the trades with reason=MATCH
    matching=out_df.loc[out_df["Reason"]=="MATCH"]
    #Extracting only the trades with reason!=MATCH
    non_auction=out_df.loc[out_df["Reason"]!="MATCH"]
    #Extracting opening auction and closing auction trades
    openingauction=matching.loc[(matching['TradeTime']>=A)&(matching['TradeTime']<=B)]
    closingauction=matching.loc[(matching['TradeTime']>=A1)&(matching['TradeTime']<=B1)]
    
    openingauction["CondCodes"]="OpeningAuction"
    closingauction["CondCodes"]="ClosingAuction"
    #Concatenating opening and closing auction dataframes to create total auction dataframe
    auction=pd.concat([openingauction,closingauction])
    #Concatenating auction and non auction dataframes to create total dataframe
    totaldf=pd.concat([auction,non_auction])
    #Extracting ASX trades into dataframe
    asxdf=totaldf.loc[totaldf["DataSource"]=="TM"]

    #Creating list of various tradetypes
    temp=["SX","SP","SA","S0","S1","S2","S3","P1","P2","PT"]
    trade_blocklist=["SX","SP","SA","S0","S1","S2","S3","P1","P2","PT"]
    for val in temp:
        trade_blocklist.append(val+str("XT"))

        temp=["L","LT","L1","L2","L3","L4","L5"]
    trade_otherlist=["L","LT","L1","L2","L3","L4","L5"]
    for val in temp:
        trade_otherlist.append(val+str("XT"))

    trade_optionslist=["EC","ECXT","EP","EPXT"]
    trade_misc=["OSXT","ET","ETXT"]
    trade_otherlist=trade_otherlist+trade_optionslist+trade_misc
    #Grouping the trade condcodes into a tradetype and create a column for it
    mapoftradetype = {"ASXOpenAuction": ["OpeningAuction"], "ASXCloseAuction": ["ClosingAuction"], "ASXNBBO": ["NX","NXXT"],"ASXCentrePt":["CX","CP","CXXT"],"ASXCentrept-Pref":["CE","CEXT"], "ASXOn-market lit": [np.nan,"XT","EQTM"],"ASXBlock":trade_blocklist,"ASXOthers":trade_otherlist}
    mappedtradetype = {v: k for k,vv in mapoftradetype.items() for v in vv}
    asxdf["Tradetype"]=asxdf.CondCodes.map(mappedtradetype).astype("category", categories=set(mappedtradetype.values()))
    #Extracting only the ChiX trades
    ChiXdf=out_df.loc[out_df["DataSource"]=="CXA"]
    #Grouping the trade condcodes into a tradetype and create a column for it
    m = {"CHIXblock": ["SP","SX","SXDE"], "CHIXother": ["PC","PO","ET","LT","MC"], "CHIXNBBO": ["NX","NXXT"],"CHIXon-market hidden":["HL","BR"], "CHIXon-market lit": [np.nan,"XT","S","B"]}
    m2 = {v: k for k,vv in m.items() for v in vv}
    ChiXdf["Tradetype"]=ChiXdf.CondCodes.map(m2).astype("category", categories=set(m2.values()))
    #Concatenating asx and chix dataframes to create total dataframe
    tempdf=pd.concat([asxdf,ChiXdf])
    #Extracting data of relevant Tradetype
    daily_df1=tempdf.loc[tempdf["Tradetype"]=="ASXOpenAuction"]
    daily_df2=tempdf.loc[tempdf["Tradetype"]=="ASXCloseAuction"]
    daily_df3=tempdf.loc[tempdf["Tradetype"]=="ASXOn-market lit"]
    daily_df4=tempdf.loc[tempdf["Tradetype"]=="ASXCentrePt"]
    daily_df5=tempdf.loc[tempdf["Tradetype"]=="ASXNBBO"]
    daily_df6=tempdf.loc[tempdf["Tradetype"]=="ASXCentrept-Pref"]
    daily_df7=tempdf.loc[tempdf["Tradetype"]=="CHIXon-market lit"]
    daily_df8=tempdf.loc[tempdf["Tradetype"]=="CHIXNBBO"]
    daily_df9=tempdf.loc[tempdf["Tradetype"]=="CHIXon-market hidden"]
    #Concatenating extracted dataframes to create total dataframe
    daily_df=pd.concat([daily_df1,daily_df2,daily_df3,daily_df4,daily_df5,daily_df6,daily_df7,daily_df8,daily_df9])
    nine_to_four_df=copy.deepcopy(daily_df)
    #Extracting the required daily measures 
    nine_to_four_df=nine_to_four_df.sort_values(by="TradeTime")
    openprice=nine_to_four_df.iloc[0]["TradePrice"]/100
    lowprice=min(nine_to_four_df["TradePrice"])/100
    highprice=max(nine_to_four_df["TradePrice"])/100
    closeprice=nine_to_four_df.iloc[len(nine_to_four_df)-1]["TradePrice"]/100
    vwapprice=(nine_to_four_df["TradeValue"].sum()/nine_to_four_df["TradeVolume"].sum())
    date=nine_to_four_df.iloc[0]["TradeDate"].date()
    #Appending the information to their corresponding lists
    daily_dataframe['Date'].append(date)
    daily_dataframe['Open'].append(openprice)
    daily_dataframe['Close'].append(closeprice)
    daily_dataframe['Low'].append(lowprice)
    daily_dataframe['High'].append(highprice)
    daily_dataframe['Vwap'].append(vwapprice)
    
#Saving into a dataframe
daily_df=pd.DataFrame.from_dict(daily_dataframe)
#Writing the dataframe in the summary file    
daily_df.to_excel(TLS_writer, sheet_name='Daily_stock_movement')
In [15]:
#Plotting the daily_df as a candle-stick graph
import plotly.graph_objects as go
layout = dict(
        title="Daily Stock Price-Movement",
        xaxis=go.layout.XAxis(title=go.layout.xaxis.Title( text="Dates")),
        yaxis=go.layout.YAxis(title=go.layout.yaxis.Title( text="Price $ - AUD Dollars"))
)
data=go.Candlestick(x=daily_df['Date'],open=daily_df['Open'],high=daily_df['High'],low=daily_df['Low'],close=daily_df['Close'])
figSignal = go.Figure(data=data, layout=layout)
figSignal.show()

The extra bar that comes below OHLC graph is used to interact with the OHLC graph. You can increase or decrease the area of focus by clicking the moving the leftmost and rightmost panels on the lower bar. It can be used in analyzing certain points of data if the data is huge.

In [16]:
fig = plt.figure() # Create matplotlib figure
ax = fig.add_subplot(111)
#Plotting the daily vwap
plt.plot(daily_df["Date"],daily_df["Vwap"])
plt.style.use('seaborn')
plt.ylabel('Prices in $AUD')
plt.xlabel('Dates')
plt.title("% VWAP of TLS Aug 2019")
plt.legend() 
Out[16]:
<matplotlib.legend.Legend at 0x2b37be48be0>

Broker-Market Share

In [17]:
import os
import pandas as pd
import numpy as np
import datetime
import copy
#Reading the file
cdf = pd.read_excel("TLS  Course of Sales.xlsx", sheet_name=None)
#Concatenating all the sheets to a single sheet
out_df = pd.concat(cdf,ignore_index=True)
tempdf=copy.deepcopy(out_df)
#creating list of sellers
sellerids=tempdf["SellerName"]
#creating list of names
buyerids=tempdf["BuyerName"]
#All the brokers involved in trading
totallist=buyerids.append(sellerids)
#Removing the repitions by converting them into a set
totallist= list(set(totallist))
buyerids= list(set(buyerids))
sellerids= list(set(sellerids))

#Calculating the totalvolume 
totalshare=tempdf["TradeVolume"].sum()*2

#Calculating the totalvolume on buy side
halftotalshare=tempdf["TradeVolume"].sum()

share_dict_total = dict.fromkeys(totallist)
share_dict_sell_side = dict.fromkeys(sellerids)
share_dict_buy_side = dict.fromkeys(buyerids)

#Iterating over the dataframe
for i in range(len(tempdf)):
    # Checking for the name of the sellername/buyername
    # if present -> then add it to the existing share
    # if not present -> then initalise it with the corresponding tradevolume
    if(share_dict_total[tempdf.iloc[i]['SellerName']]==None):
        share_dict_total[tempdf.iloc[i]['SellerName']]=tempdf.iloc[i]['TradeVolume']
    else:
        share_dict_total[tempdf.iloc[i]['SellerName']]=share_dict_total[tempdf.iloc[i]['SellerName']]+tempdf.iloc[i]['TradeVolume']
    if(share_dict_total[tempdf.iloc[i]['BuyerName']]==None):
        share_dict_total[tempdf.iloc[i]['BuyerName']]=tempdf.iloc[i]['TradeVolume']
    else:
        share_dict_total[tempdf.iloc[i]['BuyerName']]=share_dict_total[tempdf.iloc[i]['BuyerName']]+tempdf.iloc[i]['TradeVolume']
    if(share_dict_buy_side[tempdf.iloc[i]['BuyerName']]==None):
        share_dict_buy_side[tempdf.iloc[i]['BuyerName']]=tempdf.iloc[i]['TradeVolume']
    else:
        share_dict_buy_side[tempdf.iloc[i]['BuyerName']]=share_dict_buy_side[tempdf.iloc[i]['BuyerName']]+tempdf.iloc[i]['TradeVolume']
    if(share_dict_sell_side[tempdf.iloc[i]['SellerName']]==None):
        share_dict_sell_side[tempdf.iloc[i]['SellerName']]=tempdf.iloc[i]['TradeVolume']
    else:
        share_dict_sell_side[tempdf.iloc[i]['SellerName']]=share_dict_sell_side[tempdf.iloc[i]['SellerName']]+tempdf.iloc[i]['TradeVolume']
    
In [18]:
#setting the display to 2 digits after decimal
pd.options.display.float_format = '{:.2f}'.format

Broker_share_total

In [19]:
p1=share_dict_total
#iterating over all keys in dictionary and calculating their %share
for key in p1.keys():
    p1[key]=100*(p1[key]/totalshare)
keylist=[]
vallist=[]
#Appending the relevant information into lists so that it can be saved into a data-frame
for key in p1.keys():
    keylist.append(key)
    vallist.append(p1[key])
#Saving into a dataframe
temp_df = pd.DataFrame(list(zip(keylist, vallist)))
#Renaming the columns
temp_df.columns = ['a', 'b']
#Sorting by share %
temp_df=temp_df.sort_values(by=['b'], ascending=False)
#Resetting index to get labels
temp_df=temp_df.reset_index()
temp_df.drop(['index'], axis=1,inplace=True)
#Extracting the top 10 shares
df2 = temp_df.sort_values('b', ascending=False)[:10]
#Summing up the rest of them
s = temp_df.sort_values('b', ascending=False).b[10:].sum()
#Appending the other's information to the dataframe
df2.loc[len(df2)]=['Others', s]
#Renaming the columns
df2.columns = ['Company', 'Share in %']
temp_df.columns = ['Company', 'Share in %']
#Writing the dataframe in the summary file
temp_df.to_excel(TLS_writer, sheet_name='Broker_share_total')
df2
Out[19]:
Company Share in %
0 UBS 13.48
1 OpenMarkets 12.49
2 Macquarie Insto 8.29
3 Credit Suisse 7.84
4 Goldman Sachs 6.59
5 JPMorgan 6.52
6 Morgan Stanley 6.44
7 Citigroup 5.74
8 Merrill Lynch 5.28
9 Deutsche 4.57
10 Others 22.76

Broker_share_buy

In [20]:
p1=copy.deepcopy(share_dict_buy_side)
#iterating over all keys in dictionary and calculating their %share
for key in p1.keys():
    p1[key]=100*(p1[key]/halftotalshare)
keylist=[]
vallist=[]
#Appending the relevant information into lists so that it can be saved into a data-frame

for key in p1.keys():
    keylist.append(key)
    vallist.append(p1[key])
    
#Saving into a dataframe
temp_df = pd.DataFrame(list(zip(keylist, vallist)))
#Renaming the columns
temp_df.columns = ['a', 'b']
#Sorting by share %
temp_df=temp_df.sort_values(by=['b'], ascending=False)
#Resetting index to get labels
temp_df=temp_df.reset_index()
temp_df.drop(['index'], axis=1,inplace=True)
#Extracting the top 10 shares
df2 = temp_df.sort_values('b', ascending=False)[:10]
#Summing up the rest of them
s = temp_df.sort_values('b', ascending=False).b[10:].sum()
#Appending the other's information to the dataframe
df2.loc[len(df2)]=['Others', s]
#Renaming the columns
df2.columns = ['Company', 'Share in %']
temp_df.columns = ['Company', 'Share in %']

#Writing the dataframe in the summary file
temp_df.to_excel(TLS_writer, sheet_name='Broker_share_buy')
df2
Out[20]:
Company Share in %
0 UBS 14.01
1 OpenMarkets 12.48
2 Macquarie Insto 8.72
3 Credit Suisse 8.69
4 Goldman Sachs 6.92
5 Morgan Stanley 6.83
6 JPMorgan 6.72
7 Citigroup 5.65
8 Deutsche 4.71
9 Merrill Lynch 4.34
10 Others 20.92

Broker_share_sell

In [21]:
p1=copy.deepcopy(share_dict_sell_side)
#iterating over all keys in dictionary and calculating their %share

for key in p1.keys():
    p1[key]=100*(p1[key]/halftotalshare)
keylist=[]
vallist=[]
#Appending the relevant information into lists so that it can be saved into a data-frame

for key in p1.keys():
    keylist.append(key)
    vallist.append(p1[key])
    
#Saving into a dataframe
temp_df = pd.DataFrame(list(zip(keylist, vallist)))
#Renaming the columns
temp_df.columns = ['a', 'b']
#Sorting by share %
temp_df=temp_df.sort_values(by=['b'], ascending=False)
#Resetting index to get labels
temp_df=temp_df.reset_index()
temp_df.drop(['index'], axis=1,inplace=True)
#Extracting the top 10 shares
df2 = temp_df.sort_values('b', ascending=False)[:10]
#Summing up the rest of them
s = temp_df.sort_values('b', ascending=False).b[10:].sum()
#Appending the other's information to the dataframe
df2.loc[len(df2)]=['Others', s]
#Renaming the columns
df2.columns = ['Company', 'Share in %']
temp_df.columns = ['Company', 'Share in %']

#Writing the dataframe in the summary file
temp_df.to_excel(TLS_writer, sheet_name='Broker_share_sell')
df2
Out[21]:
Company Share in %
0 UBS 12.95
1 OpenMarkets 12.51
2 Macquarie Insto 7.87
3 Credit Suisse 6.99
4 JPMorgan 6.31
5 Goldman Sachs 6.27
6 Merrill Lynch 6.22
7 Morgan Stanley 6.04
8 Citigroup 5.82
9 Deutsche 4.42
10 Others 24.60

Broker Market Share Block Trades

In [22]:
import os
import pandas as pd
import numpy as np
import datetime
import copy
#Reading the file
cdf = pd.read_excel("TLS  Course of Sales.xlsx", sheet_name=None)
#Concatenating all the sheets to a single sheet
out_df = pd.concat(cdf,ignore_index=True)
#Extracting Chi-X trades
ChiXdf=out_df.loc[out_df["DataSource"]=="CXA"]
#Grouping the trade condcodes into a tradetype and create a column for it
m = {"CHIXblock": ["SP","SX","SXDE"], "CHIXother": ["PC","PO","ET","LT","MC"], "CHIXNBBO": ["NX","NXXT"],"CHIXon-market hidden":["HL","BR"], "CHIXon-market lit": [np.nan,"XT","S","B"]}
m2 = {v: k for k,vv in m.items() for v in vv}
ChiXdf["Tradetype"]=ChiXdf.CondCodes.map(m2).astype("category", categories=set(m2.values()))

import os
import pandas as pd
import numpy as np
import datetime
import copy
import matplotlib.pyplot as plt
#Reading the file
cdf = pd.read_excel("TLS  Course of Sales.xlsx", sheet_name=None)
#Concatenating all the sheets to a single sheet
out_df = pd.concat(cdf,ignore_index=True)
#Extracting only the trades with reason=MATCH
matching=out_df.loc[out_df["Reason"]=="MATCH"]
#Extracting only the trades with reason!=MATCH
non_auction=out_df.loc[out_df["Reason"]!="MATCH"]

from datetime import time
A=time(hour =9, minute = 59, second = 45)
B=time(hour =10, minute = 10, second = 0)
#Extracting opening auction trades
openingauction=matching.loc[(matching['TradeTime']>=A)&(matching['TradeTime']<=B)]

from datetime import time
A=time(hour =16, minute = 0, second = 0)
B=time(hour =16, minute = 12, second = 0)
#Extracting closing auction trades
closingauction=matching.loc[(matching['TradeTime']>=A)&(matching['TradeTime']<=B)]

openingauction["CondCodes"]="OpeningAuction"
closingauction["CondCodes"]="ClosingAuction"

#Concatenating opening and closing auction dataframes to create total auction dataframe
auction=pd.concat([openingauction,closingauction])
#Concatenating auction and non auction dataframes to create total dataframe
totaldf=pd.concat([auction,non_auction])
#Extracting ASX trades into dataframe
asxdf=totaldf.loc[totaldf["DataSource"]=="TM"]

#Creating list of various tradetypes
temp=["SX","SP","SA","S0","S1","S2","S3","P1","P2","PT"]
trade_blocklist=["SX","SP","SA","S0","S1","S2","S3","P1","P2","PT"]
for val in temp:
    trade_blocklist.append(val+str("XT"))

    temp=["L","LT","L1","L2","L3","L4","L5"]
trade_otherlist=["L","LT","L1","L2","L3","L4","L5"]
for val in temp:
    trade_otherlist.append(val+str("XT"))

trade_optionslist=["EC","ECXT","EP","EPXT"]
trade_misc=["OSXT","ET","ETXT"]
trade_otherlist=trade_otherlist+trade_optionslist+trade_misc

mapoftradetype = {"ASXOpenAuction": ["OpeningAuction"], "ASXCloseAuction": ["ClosingAuction"], "ASXNBBO": ["NX","NXXT"],"ASXCentrePt":["CX","CP","CXXT"],"ASXCentrept-Pref":["CE","CEXT"], "ASXOn-market lit": [np.nan,"XT","EQTM"],"ASXBlock":trade_blocklist,"ASXOthers":trade_otherlist}

mappedtradetype = {v: k for k,vv in mapoftradetype.items() for v in vv}

asxdf["Tradetype"]=asxdf.CondCodes.map(mappedtradetype).astype("category", categories=set(mappedtradetype.values()))


tempdf=pd.concat([asxdf,ChiXdf])

#Extracting only block trades
chiblock=tempdf.loc[tempdf["Tradetype"]=="CHIXblock" ]
asxblock=tempdf.loc[tempdf["Tradetype"]=="ASXBlock" ]
#Concatenating asx and chix block-dataframes to create total block dataframe
totalblock=pd.concat([asxblock,chiblock])


tempdf=copy.deepcopy(totalblock)
#creating list of sellers
sellerids=tempdf["SellerName"]
#creating list of names
buyerids=tempdf["BuyerName"]
#All the brokers involved in trading
totallist=buyerids.append(sellerids)
#Removing the repitions by converting them into a set
totallist= list(set(totallist))
buyerids= list(set(buyerids))
sellerids= list(set(sellerids))

totalshare=tempdf["TradeVolume"].sum()*2

share_dict_total = dict.fromkeys(totallist)

for i in range(len(tempdf)):
    # Checking for the name of the sellername/buyername
    # if present -> then add it to the existing share
    # if not present -> then initalise it with the corresponding tradevolume
    if(share_dict_total[tempdf.iloc[i]['SellerName']]==None):
        share_dict_total[tempdf.iloc[i]['SellerName']]=tempdf.iloc[i]['TradeVolume']
    else:
        share_dict_total[tempdf.iloc[i]['SellerName']]=share_dict_total[tempdf.iloc[i]['SellerName']]+tempdf.iloc[i]['TradeVolume']
    if(share_dict_total[tempdf.iloc[i]['BuyerName']]==None):
        share_dict_total[tempdf.iloc[i]['BuyerName']]=tempdf.iloc[i]['TradeVolume']
    else:
        share_dict_total[tempdf.iloc[i]['BuyerName']]=share_dict_total[tempdf.iloc[i]['BuyerName']]+tempdf.iloc[i]['TradeVolume']

#iterating over all keys in dictionary and calculating their %share
p1=copy.deepcopy(share_dict_total)
for key in p1.keys():
    p1[key]=100*(p1[key]/totalshare)
keylist=[]
vallist=[]
#Appending the relevant information into lists so that it can be saved into a data-frame
for key in p1.keys():
    keylist.append(key)
    vallist.append(p1[key])
    
#Saving into a dataframe
temp_df = pd.DataFrame(list(zip(keylist, vallist)))
#Renaming the columns
temp_df.columns = ['a', 'b']
#Sorting by share %
temp_df=temp_df.sort_values(by=['b'], ascending=False)
#Resetting index to get labels
temp_df=temp_df.reset_index()
temp_df.drop(['index'], axis=1,inplace=True)
#Extracting the top 10 shares
df2 = temp_df.sort_values('b', ascending=False)[:10]
#Summing up the rest of them
s = temp_df.sort_values('b', ascending=False).b[10:].sum()
#Appending the other's information to the dataframe
df2.loc[len(df2)]=['Others', s]
#Renaming the columns
df2.columns = ['Company', 'Share in %']
temp_df.columns = ['Company', 'Share in %']

#Writing the dataframe in the summary file
temp_df.to_excel(TLS_writer, sheet_name='Block_Broker_share_total')
df2
Out[22]:
Company Share in %
0 Macquarie Insto 36.00
1 Goldman Sachs 19.93
2 JPMorgan 13.79
3 Credit Suisse 12.85
4 Citigroup 7.49
5 Merrill Lynch 6.84
6 Deutsche 2.40
7 UBS 0.42
8 Shaw 0.29
9 Others 0.00

NBBO broker share

In [23]:
tempdf=pd.concat([asxdf,ChiXdf])
#Extracting only NBBO trades
chiblock=tempdf.loc[tempdf["Tradetype"]=="CHIXNBBO"]
asxblock=tempdf.loc[tempdf["Tradetype"]=="ASXNBBO"]
#Concatenating asx and chix nbbo-dataframes to create total nbbo block
totalblock=pd.concat([asxblock,chiblock])


tempdf=copy.deepcopy(totalblock)
#creating list of sellers
sellerids=tempdf["SellerName"]
#creating list of names
buyerids=tempdf["BuyerName"]
#All the brokers involved in trading
totallist=buyerids.append(sellerids)
#Removing the repitions by converting them into a set
totallist= list(set(totallist))
buyerids= list(set(buyerids))
sellerids= list(set(sellerids))

totalshare=tempdf["TradeVolume"].sum()*2

share_dict_total = dict.fromkeys(totallist)

for i in range(len(tempdf)):
    # Checking for the name of the sellername/buyername
    # if present -> then add it to the existing share
    # if not present -> then initalise it with the corresponding tradevolume
    if(share_dict_total[tempdf.iloc[i]['SellerName']]==None):
        share_dict_total[tempdf.iloc[i]['SellerName']]=tempdf.iloc[i]['TradeVolume']
    else:
        share_dict_total[tempdf.iloc[i]['SellerName']]=share_dict_total[tempdf.iloc[i]['SellerName']]+tempdf.iloc[i]['TradeVolume']
    if(share_dict_total[tempdf.iloc[i]['BuyerName']]==None):
        share_dict_total[tempdf.iloc[i]['BuyerName']]=tempdf.iloc[i]['TradeVolume']
    else:
        share_dict_total[tempdf.iloc[i]['BuyerName']]=share_dict_total[tempdf.iloc[i]['BuyerName']]+tempdf.iloc[i]['TradeVolume']
    
p1=copy.deepcopy(share_dict_total)
#iterating over all keys in dictionary and calculating their %share
for key in p1.keys():
    p1[key]=100*(p1[key]/totalshare)
keylist=[]
vallist=[]
#Appending the relevant information into lists so that it can be saved into a data-frame
for key in p1.keys():
    keylist.append(key)
    vallist.append(p1[key])
#Saving into a dataframe
temp_df = pd.DataFrame(list(zip(keylist, vallist)))
#Renaming the columns
temp_df.columns = ['a', 'b']
#Sorting by share %
temp_df=temp_df.sort_values(by=['b'], ascending=False)
#Resetting index to get labels
temp_df=temp_df.reset_index()
temp_df.drop(['index'], axis=1,inplace=True)
#Extracting the top 10 shares
df2 = temp_df.sort_values('b', ascending=False)[:10]
#Summing up the rest of them
s = temp_df.sort_values('b', ascending=False).b[10:].sum()
#Appending the other's information to the dataframe
df2.loc[len(df2)]=['Others', s]
#Renaming the columns
df2.columns = ['Company', 'Share in %']
temp_df.columns = ['Company', 'Share in %']

#Writing the dataframe in the summary file
temp_df.to_excel(TLS_writer, sheet_name='NBBO_Broker_share_total')
df2
Out[23]:
Company Share in %
0 Credit Suisse 40.91
1 JPMorgan 17.67
2 Morgan Stanley 12.34
3 CLSA 10.56
4 Goldman Sachs 8.70
5 Macquarie Insto 3.40
6 Merrill Lynch 2.99
7 UBS 1.75
8 Deutsche 0.60
9 InstinetNomura 0.57
10 Others 0.51

Distribution of Trade-Sizes

In [24]:
#Reading the file
xl_file = pd.ExcelFile("TLS  Course of Sales.xlsx")
dfs = {sheet_name: xl_file.parse(sheet_name) 
          for sheet_name in xl_file.sheet_names}
#Removing the empty sheets and their corresponding keys
tempdfs=copy.deepcopy(dfs)
for key in dfs.keys():
    if(len(dfs[key])==0):
        del tempdfs[key]
dfs=tempdfs
finaldf=pd.DataFrame()
#Iterating over all the keys
for key in dfs.keys():
    tdf=dfs[key]
    tempdf=copy.deepcopy(tdf)
    df=copy.deepcopy(tdf)
    #Removing the rows corresponding to negative volume and the row after it
    for i in range(len(df)):
        if(df.loc[i]['TradeVolume']<0):
            tempdf.drop(index=i,inplace=True)
            tempdf.drop(index=i+1,inplace=True)
    #if the length of finaldf is 0 then initalise the finaldf with the current dataframe
    #else finaldf is the concatenation of the existing finaldf and the current dataframe
    if(len(finaldf)==0):
        finaldf=tempdf
    else:
        finaldf=pd.concat([finaldf,tempdf])
In [25]:
#setting the display to 2 digits after decimal
pd.options.display.float_format = '{:.2f}'.format
In [26]:
out_df=copy.deepcopy(finaldf)
#Extracting only the ChiX trades
ChiXdf=out_df.loc[out_df["DataSource"]=="CXA"]
#Grouping the trade condcodes into a tradetype and create a column for it
m = {"CHIXblock": ["SP","SX","SXDE"], "CHIXother": ["PC","PO","ET","LT","MC"], "CHIXNBBO": ["NX","NXXT"],"CHIXon-market hidden":["HL","BR"], "CHIXon-market lit": [np.nan,"XT","S","B"]}
m2 = {v: k for k,vv in m.items() for v in vv}
ChiXdf["Tradetype"]=ChiXdf.CondCodes.map(m2).astype("category", categories=set(m2.values()))

import os
import pandas as pd
import numpy as np
import datetime
import copy
import matplotlib.pyplot as plt 
#Extracting only the trades with reason=MATCH
matching=out_df.loc[out_df["Reason"]=="MATCH"]
#Extracting only the trades with reason!=MATCH
non_auction=out_df.loc[out_df["Reason"]!="MATCH"]

from datetime import time
A=time(hour =9, minute = 59, second = 45)
B=time(hour =10, minute = 10, second = 0)
#Extracting opening auction trades
openingauction=matching.loc[(matching['TradeTime']>=A)&(matching['TradeTime']<=B)]

from datetime import time
A=time(hour =16, minute = 0, second = 0)
B=time(hour =16, minute = 12, second = 0)
#Extracting closing auction trades
closingauction=matching.loc[(matching['TradeTime']>=A)&(matching['TradeTime']<=B)]

openingauction["CondCodes"]="OpeningAuction"
closingauction["CondCodes"]="ClosingAuction"
#Concatenating opening and closing auction dataframes to create total auction dataframe
auction=pd.concat([openingauction,closingauction])
#Concatenating auction and non auction dataframes to create total dataframe
totaldf=pd.concat([auction,non_auction])
#Extracting only asxtrades
asxdf=totaldf.loc[totaldf["DataSource"]=="TM"]
#Creating list of various tradetypes
temp=["SX","SP","SA","S0","S1","S2","S3","P1","P2","PT"]
trade_blocklist=["SX","SP","SA","S0","S1","S2","S3","P1","P2","PT"]
for val in temp:
    trade_blocklist.append(val+str("XT"))

    temp=["L","LT","L1","L2","L3","L4","L5"]
trade_otherlist=["L","LT","L1","L2","L3","L4","L5"]
for val in temp:
    trade_otherlist.append(val+str("XT"))

trade_optionslist=["EC","ECXT","EP","EPXT"]
trade_misc=["OSXT","ET","ETXT"]
trade_otherlist=trade_otherlist+trade_optionslist+trade_misc
#Grouping the trade condcodes into a tradetype and create a column for it
mapoftradetype = {"ASXOpenAuction": ["OpeningAuction"], "ASXCloseAuction": ["ClosingAuction"], "ASXNBBO": ["NX","NXXT"],"ASXCentrePt":["CX","CP","CXXT"],"ASXCentrept-Pref":["CE","CEXT"], "ASXOn-market lit": [np.nan,"XT","EQTM"],"ASXBlock":trade_blocklist,"ASXOthers":trade_otherlist}
mappedtradetype = {v: k for k,vv in mapoftradetype.items() for v in vv}
asxdf["Tradetype"]=asxdf.CondCodes.map(mappedtradetype).astype("category", categories=set(mappedtradetype.values()))
#Concatenating chixdf and asxdf dataframes to create total dataframe
tempdf=pd.concat([asxdf,ChiXdf])

NOTE:

Count here refers to the number of observations in particular category

25,50 and 75 are the percentile values

Block-Trades

In [27]:
#Select block trades of both CHIX and ASX and concatenating them to create a total block trade dataframe
chiblock=tempdf.loc[tempdf["Tradetype"]=="CHIXblock" ]
asxblock=tempdf.loc[tempdf["Tradetype"]=="ASXBlock" ]
totalblock=pd.concat([asxblock,chiblock])
#Calculating the required stats
totalblock["TradeValue"].describe()
Out[27]:
count         80.00
mean     5088170.30
std      7503000.32
min        20032.51
25%      1493499.33
50%      2167676.01
75%      5804787.14
max     49879248.00
Name: TradeValue, dtype: float64

ASX Trades

In [28]:
#Calculating the required stats
asxdf["TradeValue"].describe()
Out[28]:
count     138898.00
mean       18031.63
std       163310.92
min            3.62
25%          501.15
50%         2689.75
75%        11610.00
max     31659000.00
Name: TradeValue, dtype: float64

CHI-X

In [29]:
#Calculating the required stats
ChiXdf["TradeValue"].describe()
Out[29]:
count      70420.00
mean       11888.09
std       232295.88
min            3.62
25%          281.88
50%         1927.69
75%         8703.35
max     49879248.00
Name: TradeValue, dtype: float64

CenterPt

In [30]:
#Select CenterPt trades of ASX and concatenating them to create a total CenterPt trade dataframe
cppdf=tempdf.loc[tempdf["Tradetype"]=="ASXCentrept-Pref" ]
cpdf=tempdf.loc[tempdf["Tradetype"]=="ASXCentrePt" ]
totalcpblock=pd.concat([cppdf,cpdf])
#Calculating the required stats
totalcpblock["TradeValue"].describe()
Out[30]:
count     38539.00
mean       8087.52
std       28876.40
min           3.62
25%         258.92
50%        1641.51
75%        5291.28
max     1278520.57
Name: TradeValue, dtype: float64

Crossing

In [31]:
#Select Crossing trades of both ChiX and ASX and concatenating them to create a total Crossing trade dataframe
chinbbodf=tempdf.loc[tempdf["Tradetype"]=="CHIXNBBO" ]
asxnbbodf=tempdf.loc[tempdf["Tradetype"]=="ASXNBBO" ]
totalcpblock=pd.concat([chinbbodf,asxnbbodf])
#Calculating the required stats
totalcpblock["TradeValue"].describe()
Out[31]:
count     20080.00
mean      10544.59
std      137752.66
min           3.62
25%         209.69
50%         969.87
75%        4678.41
max     9812500.00
Name: TradeValue, dtype: float64

Herfindahl Index

In [32]:
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
import copy
from datetime import time
#Reading the file and storing the sheets in a dictionary
xl_file = pd.ExcelFile("TLS  Course of Sales.xlsx")
dfs = {sheet_name: xl_file.parse(sheet_name) 
          for sheet_name in xl_file.sheet_names}
#Removing the empty sheets and their corresponding keys
tempdfs=copy.deepcopy(dfs)
for key in dfs.keys():
    if(len(dfs[key])==0):
        del tempdfs[key]

dfs=tempdfs
groupeddf=dfs
#Creating daily_dataframe to store the daily calculated measures
daily_dataframe={}
daily_dataframe['Date']= []
daily_dataframe['Public-ASXOnmarket-lit']= []
daily_dataframe['Public-ASXCentre']= []
daily_dataframe['Public-CHIX']= []
daily_dataframe['Public-HHI'] = []
daily_dataframe['Total-ASXOnmarket-lit']= []
daily_dataframe['Total-ASXCentre']= []
daily_dataframe['Total-CHIX']= []
daily_dataframe['Total-CLSA'] = []
daily_dataframe['Total-GoldmanSachs'] = []
daily_dataframe['Total-UBS'] = []
daily_dataframe['Total-Citigroup'] = []
daily_dataframe['Total-JPMorgan'] = []
daily_dataframe['Total-CreditSuisse'] = []
daily_dataframe['Total-MerrillLynch'] = []
daily_dataframe['Total-Deutsche'] = []
daily_dataframe['Total-MorganStanley'] = []
daily_dataframe['Total-MacquarieInsto'] = []
daily_dataframe['Total-VirtuITG'] = []
daily_dataframe['Total-InstinetNomura'] = []
daily_dataframe['Total-HHI'] = []
#Iterating over all the keys
for key in dfs.keys():
    tdf=dfs[key]
    out_df=copy.deepcopy(tdf)
    #Extracting only the ChiX trades
    ChiXdf=out_df.loc[out_df["DataSource"]=="CXA"]
    #Grouping the trade condcodes into a tradetype and create a column for it
    m = {"CHIXblock": ["SP","SX","SXDE"], "CHIXother": ["PC","PO","ET","LT","MC"], "CHIXNBBO": ["NX","NXXT"],"CHIXon-market hidden":["HL","BR"], "CHIXon-market lit": [np.nan,"XT","S","B"]}
    m2 = {v: k for k,vv in m.items() for v in vv}
    ChiXdf["Tradetype"]=ChiXdf.CondCodes.map(m2).astype("category", categories=set(m2.values()))
    #Extracting only the trades with reason=MATCH
    matching=out_df.loc[out_df["Reason"]=="MATCH"]
    #Extracting only the trades with reason!=MATCH
    non_auction=out_df.loc[out_df["Reason"]!="MATCH"]

    A=time(hour =9, minute = 59, second = 45)
    B=time(hour =10, minute = 10, second = 0)
    #Extracting opening auction trades
    openingauction=matching.loc[(matching['TradeTime']>=A)&(matching['TradeTime']<=B)]

    A=time(hour =16, minute = 0, second = 0)
    B=time(hour =16, minute = 12, second = 0)
    #Extracting closing auction trades
    closingauction=matching.loc[(matching['TradeTime']>=A)&(matching['TradeTime']<=B)]

    openingauction["CondCodes"]="OpeningAuction"
    closingauction["CondCodes"]="ClosingAuction"
    #Concatenating opening and closing auction dataframes to create total auction dataframe
    auction=pd.concat([openingauction,closingauction])
    #Concatenating auction and non auction dataframes to create total dataframe
    totaldf=pd.concat([auction,non_auction])
    #Extracting only asxtrades
    asxdf=totaldf.loc[totaldf["DataSource"]=="TM"]
    #Creating list of various tradetypes
    temp=["SX","SP","SA","S0","S1","S2","S3","P1","P2","PT"]
    trade_blocklist=["SX","SP","SA","S0","S1","S2","S3","P1","P2","PT"]
    for val in temp:
        trade_blocklist.append(val+str("XT"))

        temp=["L","LT","L1","L2","L3","L4","L5"]
    trade_otherlist=["L","LT","L1","L2","L3","L4","L5"]
    for val in temp:
        trade_otherlist.append(val+str("XT"))

    trade_optionslist=["EC","ECXT","EP","EPXT"]
    trade_misc=["OSXT","ET","ETXT"]
    trade_otherlist=trade_otherlist+trade_optionslist+trade_misc
    #Grouping the trade condcodes into a tradetype and create a column for it
    mapoftradetype = {"ASXOpenAuction": ["OpeningAuction"], "ASXCloseAuction": ["ClosingAuction"], "ASXNBBO": ["NX","NXXT"],"ASXCentrePt":["CX","CP","CXXT"],"ASXCentrept-Pref":["CE","CEXT"], "ASXOn-market lit": [np.nan,"XT","EQTM"],"ASXBlock":trade_blocklist,"ASXOthers":trade_otherlist}
    mappedtradetype = {v: k for k,vv in mapoftradetype.items() for v in vv}
    asxdf["Tradetype"]=asxdf.CondCodes.map(mappedtradetype).astype("category", categories=set(mappedtradetype.values()))
    #Concatenating chixdf and asxdf dataframes to create total dataframe
    tempdf=pd.concat([asxdf,ChiXdf])
    #Extract relevant tradetypes
    daily_df1=tempdf.loc[tempdf["Tradetype"]=="ASXOpenAuction"]
    daily_df2=tempdf.loc[tempdf["Tradetype"]=="ASXCloseAuction"]
    daily_df3=tempdf.loc[tempdf["Tradetype"]=="ASXOn-market lit"]
    daily_df4=tempdf.loc[tempdf["Tradetype"]=="ASXCentrePt"]
    daily_df5=tempdf.loc[tempdf["Tradetype"]=="ASXNBBO"]
    daily_df6=tempdf.loc[tempdf["Tradetype"]=="ASXCentrept-Pref"]
    daily_df7=tempdf.loc[tempdf["Tradetype"]=="CHIXon-market lit"]
    daily_df8=tempdf.loc[tempdf["Tradetype"]=="CHIXNBBO"]
    daily_df9=tempdf.loc[tempdf["Tradetype"]=="CHIXon-market hidden"]
    #Concatenating the extracted dataframes to generate a total dataframe
    daily_df=pd.concat([daily_df3,daily_df4,daily_df6,daily_df7,daily_df9])
    #Concatenating the extracted NBBO dataframes to generate a total NBBO dataframe
    nbbo_df=pd.concat([daily_df8,daily_df5])
    #Grouping the trade condcodes into a tradetype and create a column for it
    mapoftradetype = {"type1": ["ASXOn-market lit"], "type2": ["ASXCentrePt","ASXCentrept-Pref"], "type3": ["CHIXon-market lit","CHIXon-market hidden"]}
    mappedtradetype={v: k for k,vv in mapoftradetype.items() for v in vv}
    #Grouping the trade condcodes into a tradetype and create a column for it
    daily_df["Tradetype2"]=daily_df.Tradetype.map(mappedtradetype).astype("category", categories=set(mappedtradetype.values()))
    #Creating the tradetype column for nbbo
    nbbo_df["Tradetype2"]=nbbo_df["BuyerName"]
    #Removing the brokers which don't have the crossing platforms
    nbbo_df=nbbo_df[nbbo_df["Tradetype2"]!='Evans & Partner']
    #Extracting  relevant information
    nine_to_four_df=copy.deepcopy(daily_df)
    date=nine_to_four_df.iloc[0]["TradeDate"].date()
    #Calculating total volume traded
    halftotalshare=nine_to_four_df["TradeVolume"].sum()
    temp_final_df=copy.deepcopy(nine_to_four_df)
    #Calculating the volume shares in % of total volume traded
    temp_final_df["TradeVolume"]=temp_final_df["TradeVolume"]/halftotalshare
    #Grouping based on tradetypes
    temp_final_df=temp_final_df.groupby("Tradetype2").sum()
    #Resetting index to get labels
    temp_final_df.reset_index(inplace=True)
    publichhiindex=0
    public_asx_On_Market_lit=0
    public_asx_Centre=0
    public_chi_all=0
    total_asx_On_Market_lit=0
    total_asx_Centre=0
    total_chi_all=0
    total_clsa=0
    total_goldmansachs=0
    total_ubs=0
    total_citigroup=0
    total_jpmorgan=0
    total_creditsuisse=0
    total_merrilllynch=0
    total_deutsche=0
    total_morganstanley=0
    total_macquarie=0
    total_virtu_itg=0
    total_instinetnoumra=0
    #initializing variable based on tradetype2 
    for i in range(len(temp_final_df)):
        publichhiindex=publichhiindex+temp_final_df.iloc[i]["TradeVolume"]**2
        if(temp_final_df.iloc[i]["Tradetype2"]=="type1"):
            public_asx_On_Market_lit=temp_final_df.iloc[i]["TradeVolume"]
        elif(temp_final_df.iloc[i]["Tradetype2"]=="type2"):
            public_asx_Centre=temp_final_df.iloc[i]["TradeVolume"]
        elif(temp_final_df.iloc[i]["Tradetype2"]=="type3"):
            public_chi_all=temp_final_df.iloc[i]["TradeVolume"]
    nine_to_four_df=copy.deepcopy(daily_df)
    #Concatenating the extracted dataframe and nbbo dataframe to generate a total dataframe
    nine_to_four_df=pd.concat([nine_to_four_df,nbbo_df])
    #Calculating total volume traded
    halftotalshare=nine_to_four_df["TradeVolume"].sum()
    temp_final_df=copy.deepcopy(nine_to_four_df)
    #Calculating the volume shares in % of total volume traded
    temp_final_df["TradeVolume"]=temp_final_df["TradeVolume"]/halftotalshare
    #Grouping based on tradetypes
    temp_final_df=temp_final_df.groupby("Tradetype2").sum()
    #Resetting index to get labels
    temp_final_df.reset_index(inplace=True)
    totalhhiindex=0
    #initializing variable based on tradetype2
    for i in range(len(temp_final_df)):
        totalhhiindex=totalhhiindex+temp_final_df.iloc[i]["TradeVolume"]**2
        if(temp_final_df.iloc[i]["Tradetype2"]=="type1"):
            total_asx_On_Market_lit=temp_final_df.iloc[i]["TradeVolume"]
        elif(temp_final_df.iloc[i]["Tradetype2"]=="type2"):
            total_asx_Centre=temp_final_df.iloc[i]["TradeVolume"]
        elif(temp_final_df.iloc[i]["Tradetype2"]=="type3"):
            total_chi_all=temp_final_df.iloc[i]["TradeVolume"]
        elif(temp_final_df.iloc[i]["Tradetype2"]=="CLSA"):
            total_clsa=temp_final_df.iloc[i]["TradeVolume"]
        elif(temp_final_df.iloc[i]["Tradetype2"]=="Goldman Sachs"):
            total_goldmansachs=temp_final_df.iloc[i]["TradeVolume"]
        elif(temp_final_df.iloc[i]["Tradetype2"]=="UBS"):
            total_ubs=temp_final_df.iloc[i]["TradeVolume"]
        elif(temp_final_df.iloc[i]["Tradetype2"]=="Citigroup"):
            total_citigroup=temp_final_df.iloc[i]["TradeVolume"]
        elif(temp_final_df.iloc[i]["Tradetype2"]=="JPMorgan"):
            total_jpmorgan=temp_final_df.iloc[i]["TradeVolume"]
        elif(temp_final_df.iloc[i]["Tradetype2"]=="Credit Suisse"):
            total_creditsuisse=temp_final_df.iloc[i]["TradeVolume"]
        elif(temp_final_df.iloc[i]["Tradetype2"]=="Merrill Lynch"):
            total_merrilllynch=temp_final_df.iloc[i]["TradeVolume"]
        elif(temp_final_df.iloc[i]["Tradetype2"]=="Deutsche"):
            total_deutsche=temp_final_df.iloc[i]["TradeVolume"]
        elif(temp_final_df.iloc[i]["Tradetype2"]=="Morgan Stanley"):
            total_morganstanley=temp_final_df.iloc[i]["TradeVolume"]
        elif(temp_final_df.iloc[i]["Tradetype2"]=="Macquarie Insto"):
            total_macquarie=temp_final_df.iloc[i]["TradeVolume"]
        elif(temp_final_df.iloc[i]["Tradetype2"]=="Virtu ITG Aust."):
            total_virtu_itg=temp_final_df.iloc[i]["TradeVolume"]
        elif(temp_final_df.iloc[i]["Tradetype2"]=="InstinetNomura"):
            total_instinetnoumra=temp_final_df.iloc[i]["TradeVolume"]
    #Appending the information to their corresponding lists
    daily_dataframe['Date'].append(date)
    daily_dataframe['Public-ASXOnmarket-lit'].append(public_asx_On_Market_lit)
    daily_dataframe['Public-ASXCentre'].append(public_asx_Centre)
    daily_dataframe['Public-CHIX'].append(public_chi_all)
    daily_dataframe['Public-HHI'].append(publichhiindex)
    daily_dataframe['Total-ASXOnmarket-lit'].append(total_asx_On_Market_lit)
    daily_dataframe['Total-ASXCentre'].append(total_asx_Centre)
    daily_dataframe['Total-CHIX'].append(total_chi_all)
    daily_dataframe['Total-CLSA'].append(total_clsa)
    daily_dataframe['Total-GoldmanSachs'].append(total_goldmansachs)
    daily_dataframe['Total-UBS'].append(total_ubs)
    daily_dataframe['Total-Citigroup'].append(total_citigroup)
    daily_dataframe['Total-JPMorgan'].append(total_jpmorgan)
    daily_dataframe['Total-CreditSuisse'].append(total_creditsuisse)
    daily_dataframe['Total-MerrillLynch'].append(total_merrilllynch)
    daily_dataframe['Total-Deutsche'].append(total_deutsche)
    daily_dataframe['Total-MorganStanley'].append(total_morganstanley)
    daily_dataframe['Total-MacquarieInsto'].append(total_macquarie)
    daily_dataframe['Total-VirtuITG'].append(total_virtu_itg)
    daily_dataframe['Total-InstinetNomura'].append(total_instinetnoumra)
    daily_dataframe['Total-HHI'].append(totalhhiindex)
In [33]:
#Saving into a dataframe
TLS_daily_hhi_df2=pd.DataFrame.from_dict(daily_dataframe)
#Sort according to date
TLS_daily_hhi_df2=TLS_daily_hhi_df2.sort_values("Date")
#Writing the dataframe in the summary file
TLS_daily_hhi_df2.to_excel(TLS_writer, sheet_name='HHI_index')
# Create matplotlib figure
fig = plt.figure()
ax = fig.add_subplot(111)
#Add Labels and legends to the plot
fig.suptitle("Public HHI of TLS Aug 2019", fontsize=20)
plt.xlabel('Dates', fontsize=18)
plt.ylabel('Pulblic-HHI', fontsize=16)
#Plot the Public HHI
plt.plot(TLS_daily_hhi_df2["Date"],TLS_daily_hhi_df2["Public-HHI"])
plt.legend() 
Out[33]:
<matplotlib.legend.Legend at 0x2b368eabcf8>
In [34]:
#Saving into a dataframe
TLS_daily_hhi_df2=pd.DataFrame.from_dict(daily_dataframe)
#Sort according to date
TLS_daily_hhi_df2=TLS_daily_hhi_df2.sort_values("Date")
fig = plt.figure()
# Create matplotlib figure
fig.suptitle("Total HHI of TLS Aug 2019", fontsize=20)
plt.xlabel('Dates', fontsize=18)
plt.ylabel('Total-HHI', fontsize=16)
ax = fig.add_subplot(111)
#Plot the Total HHI
plt.plot(TLS_daily_hhi_df2["Date"],TLS_daily_hhi_df2["Total-HHI"])
plt.legend() 
Out[34]:
<matplotlib.legend.Legend at 0x2b34297f5f8>

10min pattern

In [35]:
import os
import pandas as pd
import numpy as np
import datetime
import copy

#Reading the file
cdf = pd.read_excel("TLS  Course of Sales.xlsx", sheet_name=None)

#Concatenating all the sheets to a single sheet
out_df = pd.concat(cdf,ignore_index=True)

#Extracting only the ChiX trades
ChiXdf=out_df.loc[out_df["DataSource"]=="CXA"]

#Grouping the trade condcodes into a tradetype and create a column for it

m = {"CHIXblock": ["SP","SX","SXDE"], "CHIXother": ["PC","PO","ET","MC","LT"], "CHIXNBBO": ["NX","NXXT"],"CHIXon-market hidden":["HL","BR"], "CHIXon-market lit": [np.nan,"XT","S","B"]}
m2 = {v: k for k,vv in m.items() for v in vv}
ChiXdf["Tradetype"]=ChiXdf.CondCodes.map(m2).astype("category", categories=set(m2.values()))

import os
import pandas as pd
import numpy as np
import datetime
import copy
import matplotlib.pyplot as plt 
#Reading the file
cdf = pd.read_excel("TLS  Course of Sales.xlsx", sheet_name=None)
#Concatenating all the sheets to a single sheet
out_df = pd.concat(cdf,ignore_index=True)
#Extracting only the trades with reason=MATCH
matching=out_df.loc[out_df["Reason"]=="MATCH"]
#Extracting only the trades with reason!=MATCH
non_auction=out_df.loc[out_df["Reason"]!="MATCH"]

from datetime import time
A=time(hour =9, minute = 59, second = 45)
B=time(hour =10, minute = 10, second = 0)
#Extracting only the trades representing opening auction
openingauction=matching.loc[(matching['TradeTime']>=A)&(matching['TradeTime']<=B)]

from datetime import time
A=time(hour =16, minute = 0, second = 0)
B=time(hour =16, minute = 12, second = 0)
#Extracting only the trades representing closing auction
closingauction=matching.loc[(matching['TradeTime']>=A)&(matching['TradeTime']<=B)]

openingauction["CondCodes"]="OpeningAuction"

closingauction["CondCodes"]="ClosingAuction"
#Concatenating opening and closing auctin dataframes to create total auction dataframe
auction=pd.concat([openingauction,closingauction])
#Concatenating auction and non auctin dataframes to create total dataframe
totaldf=pd.concat([auction,non_auction])
#Extracting only the ASX trades
asxdf=totaldf.loc[totaldf["DataSource"]=="TM"]

#Creating list for various tradetypes
temp=["SX","SP","SA","S0","S1","S2","S3","P1","P2","PT"]
trade_blocklist=["SX","SP","SA","S0","S1","S2","S3","P1","P2","PT"]
for val in temp:
    trade_blocklist.append(val+str("XT"))

    temp=["L","LT","L1","L2","L3","L4","L5"]
trade_otherlist=["L","LT","L1","L2","L3","L4","L5"]
for val in temp:
    trade_otherlist.append(val+str("XT"))

trade_optionslist=["EC","ECXT","EP","EPXT"]
trade_misc=["OSXT","ET","ETXT"]
trade_otherlist=trade_otherlist+trade_optionslist+trade_misc
#Grouping the trade condcodes into a tradetype and create a column for it
mapoftradetype = {"ASXOpenAuction": ["OpeningAuction"], "ASXCloseAuction": ["ClosingAuction"], "ASXNBBO": ["NX","NXXT"],"ASXCentrePt":["CX","CP","CXXT"],"ASXCentrept-Pref":["CE","CEXT"], "ASXOn-market lit": [np.nan,"XT","EQTM"],"ASXBlock":trade_blocklist,"ASXOthers":trade_otherlist}
mappedtradetype = {v: k for k,vv in mapoftradetype.items() for v in vv}
asxdf["Tradetype"]=asxdf.CondCodes.map(mappedtradetype).astype("category", categories=set(mappedtradetype.values()))

#Concatenating asx and chix dataframes to create total dataframe
tempdf=pd.concat([asxdf,ChiXdf])
#Extracting relevant tradetypes
daily_df1=tempdf.loc[tempdf["Tradetype"]=="ASXOpenAuction"]
daily_df2=tempdf.loc[tempdf["Tradetype"]=="ASXCloseAuction"]
daily_df3=tempdf.loc[tempdf["Tradetype"]=="ASXOn-market lit"]
daily_df4=tempdf.loc[tempdf["Tradetype"]=="ASXCentrePt"]
daily_df5=tempdf.loc[tempdf["Tradetype"]=="ASXNBBO"]
daily_df6=tempdf.loc[tempdf["Tradetype"]=="ASXCentrept-Pref"]
daily_df7=tempdf.loc[tempdf["Tradetype"]=="CHIXon-market lit"]
daily_df8=tempdf.loc[tempdf["Tradetype"]=="CHIXNBBO"]
daily_df9=tempdf.loc[tempdf["Tradetype"]=="'CHIXon-market hidden'"]
#Concatenating relevant dataframes to create total dataframe
daily_df=pd.concat([daily_df3,daily_df4,daily_df5,daily_df6,daily_df7,daily_df8,daily_df9])

#Creating 10 min intervals
time1=pd.date_range('1/1/2001', periods=144, freq='10min')

timedict={}
revtimedict={}
revtimedict[0]="Opening Auction"
revtimedict[1000]="Closing Auction"
count=1;
#creating a dictionary for mapping time intervals to numbers
#creating a dictionary mapping numbers to time intervals. 
for timestamp in time1:
    timedict[timestamp.time()]=(count)
    revtimedict[(count)]=timestamp.time()
    count=count+1
In [36]:
#Create a list for mapping the time-interval of each trade
listoftimes=[]
for i in range(len(daily_df)):
    #print(ChiXdf1.iloc[i]["TradeTime"])
    for timekey in timedict:
        if(daily_df.iloc[i]["TradeTime"]<=timekey):
            listoftimes.append(timedict[timekey])
            break
#Appending the created list as a column
daily_df["timeblock"]=listoftimes
# daily_df.to_csv("GGGG.csv")
In [37]:
#Saving the dataframe in the csv
#daily_df.to_csv("TLS_timeblock.csv")
In [39]:
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
import copy
#Reading the timeblock csv
#daily_df=pd.read_csv("TLS_timeblock.csv")

daily_df1["timeblock"]=0
daily_df2["timeblock"]=1000
#Appending the opening and closing auctions
final_daily_df=pd.concat([daily_df1,daily_df2,daily_df])

#Grouping by time-interval
tempdf=final_daily_df.groupby("timeblock").sum()
#Resetting the index to get labels
tempdf.reset_index(inplace=True)

def converttimeback(time):
    return revtimedict[time]
#Converting the timeblock to numbers for plotting
tempdf["timeblock2"]=tempdf["timeblock"].apply(converttimeback)
#Sorting by timeblock
tempdf=tempdf.sort_values("timeblock")
#setting the index as mapped-numbers. This shall help in joining 
tempdf=tempdf.set_index('timeblock2')
#grouping by timeblock
gg=final_daily_df.groupby("timeblock")
#Calculating the size of each interval
gp=gg.size()
#converting it to a dataframe
tr=gp.to_frame()
#Resetting index to get labels
tr.reset_index(inplace=True)
#storing the number of trades in each time-interval
l1=list(tr["timeblock"].values)
#storing the time-block
l2=list(tr[0])
#creating a list for the mapping of the numbers for each time-interval
l3=[]
for time in l1:
    l3.append(revtimedict[time])
#Creating the 
dfff=pd.DataFrame(list(zip(l1, l2,l3)), columns =['timeblock1', 'numberoftrades',"timeblock2"])
#setting the index as mapped-numbers. This shall help in joining 
dfff=dfff.set_index("timeblock2")
#joining the size dataframe with the dataframe consisting TradeValue
final_10min_df=dfff.join(tempdf)
#Selecting relevant columns
final_10min_df = final_10min_df[['numberoftrades',"TradeValue",'timeblock']]
#Resetting index to get labels
final_10min_df.reset_index(inplace=True)
#Writing the dataframe in the summary file
final_10min_df.to_excel(TLS_writer, sheet_name='Intraday_stats')
# Create matplotlib figure
fig = plt.figure() 
ax = fig.add_subplot(111)
#Ploting the TradeValues
final_10min_df["TradeValue"].plot(kind='bar',color='y')
#Plotting the number of trades
final_10min_df['numberoftrades'].plot(kind='line', secondary_y=True)
#Setting the labels of the axes.
ax.set_xticklabels(final_10min_df["timeblock2"], rotation=90)
ax.set_ylabel('DollarValue in millions')
ax.set_xlabel('Time')
def y_fmt(y, pos):
    decades = [1e9, 1e6, 1e3, 1e0]
    suffix  = ["G", "M", "k", ""]
    if y == 0:
        return str(0)
    for i, d in enumerate(decades):
        if np.abs(y) >=d:
            val = y/float(d)
            signf = len(str(val).split(".")[1])
            if signf == 0:
                return '{val:d} {suffix}'.format(val=int(val), suffix=suffix[i])
            else:
                if signf == 1:
                    #print (val, signf)
                    if str(val).split(".")[1] == "0":
                        return '{val:d} {suffix}'.format(val=int(round(val)), suffix=suffix[i]) 
                tx = "{"+"val:.{signf}f".format(signf = signf) +"} {suffix}"
                return tx.format(val=val, suffix=suffix[i])
    return y
#Formatting the y-axis in a human readable way
ax.yaxis.set_major_formatter(tick.FuncFormatter(y_fmt))
#Adding subtitle and legends
fig.suptitle("Intraday stats of TLS Aug 2019", fontsize=20)
plt.legend(loc='upper right')
plt.style.use('seaborn')
plt.show()
In [40]:
#Saving the summary file
TLS_writer.save()
In [ ]: